#include <xeno/dom0_ops.h>
#include <xeno/sched.h>
#include <xeno/event.h>
+#include <asm/domain_page.h>
extern unsigned int alloc_new_dom_mem(struct task_struct *, unsigned int);
return (dom_mask == ~0UL) ? 0 : ffz(dom_mask);
}
+static void build_page_list(struct task_struct *p)
+{
+ unsigned long * list;
+ unsigned long curr;
+ unsigned long page;
+
+ list = (unsigned long *)map_domain_mem(p->pg_head << PAGE_SHIFT);
+ curr = p->pg_head;
+ *list++ = p->pg_head;
+ page = (frame_table + p->pg_head)->next;
+ printk(KERN_ALERT "bd240 debug: list %lx, page num %lx\n", list, page);
+ while(page != p->pg_head){
+ if(!((unsigned long)list & (PAGE_SIZE-1))){
+ printk(KERN_ALERT "bd240 debug: list %lx, page num %lx\n", list, page);
+ curr = (frame_table + curr)->next;
+ list = (unsigned long *)map_domain_mem(curr << PAGE_SHIFT);
+ }
+ *list++ = page;
+ page = (frame_table + page)->next;
+ }
+}
+
long do_dom0_op(dom0_op_t *u_dom0_op)
{
long ret = 0;
ret = -1;
break;
}
+ build_page_list(p);
ret = p->domain;
+
+ op.u.newdomain.domain = ret;
+ op.u.newdomain.pg_head = p->pg_head;
+ copy_to_user(u_dom0_op, &op, sizeof(op));
+ printk(KERN_ALERT "bd240 debug: hyp dom0_ops: %lx, %d\n", op.u.newdomain.pg_head, op.u.newdomain.memory_kb);
+
break;
}
#define L2_PROT (_PAGE_PRESENT|_PAGE_RW|_PAGE_USER|_PAGE_ACCESSED)
#define L1_PROT (_PAGE_PRESENT|_PAGE_RW|_PAGE_USER|_PAGE_ACCESSED|_PAGE_DIRTY)
+extern int new_do_process_page_updates(page_update_request_t *, int);
+
extern int nr_mods;
extern module_t *mod;
extern unsigned char *cmdline;
}
-static unsigned int alloc_new_dom_mem(struct task_struct *p, unsigned int kbytes)
+unsigned int alloc_new_dom_mem(struct task_struct *p, unsigned int kbytes)
{
struct list_head *temp;
struct pfn_info *pf, *pf_head;
/* allocate pages and build a thread through frame_table */
temp = free_list.next;
- printk("bd240 debug: DOM%d requesting %d pages\n", p->domain, req_pages);
/* allocate first page */
pf = list_entry(temp, struct pfn_info, list);
start_info_t * virt_startinfo_addr;
unsigned long virt_stack_addr;
unsigned long long time;
+ unsigned long phys_l1tab, phys_l2tab;
+ page_update_request_t * pgt_updates;
+ unsigned long count;
net_ring_t *net_ring;
+ net_vif_t *net_vif;
char *dst; // temporary
int i; // temporary
+ /* first of all, set up domain pagetables */
+ pgt_updates = (page_update_request_t *)
+ map_domain_mem(meminfo->pgt_update_arr);
+ printk(KERN_ALERT "bd240 debug: update request starting virt %lx, phys %lx\n", pgt_updates, meminfo->pgt_update_arr);
+ for(count = 0; count < meminfo->num_pgt_updates; count++){
+ printk(KERN_ALERT "bd240 debug: update pair %lx, %lx\n", pgt_updates->ptr, pgt_updates->val);
+ new_do_process_page_updates(pgt_updates, 1);
+ pgt_updates++;
+ if(!((unsigned long)pgt_updates & (PAGE_SIZE-1))){
+ pgt_updates--;
+ pgt_updates = (page_update_request_t *)map_domain_mem(
+ ((frame_table + ((unsigned long)pgt_updates >>
+ PAGE_SHIFT))->next) << PAGE_SHIFT);
+ }
+ }
+
/* entries 0xe0000000 onwards in page table must contain hypervisor
* mem mappings - set them up.
*/
- l2tab = (l2_pgentry_t *)__va(meminfo->l2_pgt_addr);
+ phys_l2tab = meminfo->l2_pgt_addr;
+ l2tab = map_domain_mem(phys_l2tab);
memcpy(l2tab + DOMAIN_ENTRIES_PER_L2_PAGETABLE,
- ((l2_pgentry_t *)idle0_pg_table) + DOMAIN_ENTRIES_PER_L2_PAGETABLE,
- (ENTRIES_PER_L2_PAGETABLE - DOMAIN_ENTRIES_PER_L2_PAGETABLE) * sizeof(l2_pgentry_t));
- p->mm.pagetable = mk_pagetable((unsigned long)l2tab);
+ ((l2_pgentry_t *)idle_pg_table[p->processor]) +
+ DOMAIN_ENTRIES_PER_L2_PAGETABLE,
+ (ENTRIES_PER_L2_PAGETABLE - DOMAIN_ENTRIES_PER_L2_PAGETABLE)
+ * sizeof(l2_pgentry_t));
+ l2tab[PERDOMAIN_VIRT_START >> L2_PAGETABLE_SHIFT] =
+ mk_l2_pgentry(__pa(p->mm.perdomain_pt) | PAGE_HYPERVISOR);
+ p->mm.pagetable = mk_pagetable(phys_l2tab);
/* map in the shared info structure */
- l2tab = pagetable_ptr(p->mm.pagetable) + l2_table_offset(meminfo->virt_shinfo_addr);
- l1tab = l2_pgentry_to_l1(*l2tab) + l1_table_offset(meminfo->virt_shinfo_addr);
+ phys_l2tab = pagetable_val(p->mm.pagetable) +
+ (l2_table_offset(meminfo->virt_shinfo_addr) * sizeof(l2_pgentry_t));
+ l2tab = map_domain_mem(phys_l2tab);
+ phys_l1tab = l2_pgentry_to_phys(*l2tab) +
+ (l1_table_offset(meminfo->virt_shinfo_addr) * sizeof(l1_pgentry_t));
+ l1tab = map_domain_mem(phys_l1tab);
*l1tab = mk_l1_pgentry(__pa(p->shared_info) | L1_PROT);
/* set up the shared info structure */
* new domain. thus, temporarely install its pagetables.
*/
__cli();
- __asm__ __volatile__ (
- "mov %%eax, %%cr3"
- : : "a" (__pa(pagetable_ptr(p->mm.pagetable))));
+ __asm__ __volatile__ (
+ "mov %%eax,%%cr3" : : "a" (pagetable_val(p->mm.pagetable)));
memset(virt_startinfo_addr, 0, sizeof(virt_startinfo_addr));
virt_startinfo_addr->nr_pages = p->tot_pages;
/* Add virtual network interfaces and point to them in startinfo. */
while (meminfo->num_vifs-- > 0) {
- net_ring = create_net_vif(p->domain);
+ net_vif = create_net_vif(p->domain);
+ net_ring = net_vif->net_ring;
if (!net_ring) panic("no network ring!\n");
}
- virt_startinfo_addr->net_rings = p->net_ring_base;
+
+/* XXX SMH: horrible hack to convert hypervisor VAs in SHIP to guest VAs */
+#define SH2G(_x) (meminfo->virt_shinfo_addr | (((unsigned long)(_x)) & 0xFFF))
+
+ virt_startinfo_addr->net_rings = (net_ring_t *)SH2G(p->net_ring_base);
virt_startinfo_addr->num_net_rings = p->num_net_vifs;
/* Add block io interface */
- virt_startinfo_addr->blk_ring = p->blk_ring_base;
+ virt_startinfo_addr->blk_ring = (blk_ring_t *)SH2G(p->blk_ring_base);
- /* i do not think this has to be done any more, temporary */
- /* We tell OS about any modules we were given. */
- if ( nr_mods > 1 )
- {
- virt_startinfo_addr->mod_start =
- (mod[1].mod_start-mod[0].mod_start-12) + meminfo->virt_load_addr;
- virt_startinfo_addr->mod_len =
- mod[nr_mods-1].mod_end - mod[1].mod_start;
- }
-
- /* temporary, meminfo->cmd_line just needs to be copied info start info */
dst = virt_startinfo_addr->cmd_line;
if ( mod[0].string )
{
/* Reinstate the caller's page tables. */
__asm__ __volatile__ (
- "mov %%eax,%%cr3"
- : : "a" (__pa(pagetable_ptr(current->mm.pagetable))));
+ "mov %%eax,%%cr3" : : "a" (pagetable_val(current->mm.pagetable)));
__sti();
+
new_thread(p,
(unsigned long)meminfo->virt_load_addr,
(unsigned long)virt_stack_addr,
if ( err )
{
+ page = frame_table + (cur.ptr >> PAGE_SHIFT);
+ printk(KERN_ALERT "bd240 debug: Update request %d\n", cur.ptr & (sizeof(l1_pgentry_t) - 1));
+ printk(KERN_ALERT "bd240 debug: Update request %lx, %lx\n", cur.ptr, cur.val);
+ printk(KERN_ALERT "bd240 debug: Page flags %lx\n", page->flags);
+
kill_domain_with_errmsg("Illegal page update request");
}
return(0);
}
+/* Apply updates to page table @pagetable_id within the current domain. */
+int new_do_process_page_updates(page_update_request_t * cur, int count)
+{
+ unsigned long flags, pfn;
+ struct pfn_info *page;
+ int err = 0, i;
+
+ for ( i = 0; i < count; i++ )
+ {
+ pfn = cur->ptr >> PAGE_SHIFT;
+ if ( pfn >= max_page )
+ {
+ MEM_LOG("Page out of range (%08lx > %08lx)", pfn, max_page);
+ kill_domain_with_errmsg("Page update request out of range");
+ }
+
+ err = 1;
+
+ /* Least significant bits of 'ptr' demux the operation type. */
+ switch ( cur->ptr & (sizeof(l1_pgentry_t)-1) )
+ {
+
+ /*
+ * PGREQ_NORMAL: Normal update to any level of page table.
+ */
+ case PGREQ_NORMAL:
+ page = frame_table + pfn;
+ flags = page->flags;
+
+ printk(KERN_ALERT "bd240 debug: normal update\n");
+
+ if ( (flags & PG_domain_mask) == current->domain )
+ {
+ printk(KERN_ALERT "bd240 debug: normal update inside\n");
+ switch ( (flags & PG_type_mask) )
+ {
+ case PGT_l1_page_table:
+ err = mod_l1_entry(cur->ptr, mk_l1_pgentry(cur->val));
+ break;
+ case PGT_l2_page_table:
+ err = mod_l2_entry(cur->ptr, mk_l2_pgentry(cur->val));
+ break;
+ default:
+ MEM_LOG("Update to non-pt page %08lx", cur->ptr);
+ break;
+ }
+ }
+
+ printk(KERN_ALERT "bd240 debug: normal update finish\n");
+
+ break;
+
+ /*
+ * PGREQ_UNCHECKED_UPDATE: Make an unchecked update to a
+ * bottom-level page-table entry.
+ * Restrictions apply:
+ * 1. Update only allowed by domain 0.
+ * 2. Update must be to a level-1 pte belonging to dom0.
+ */
+ case PGREQ_UNCHECKED_UPDATE:
+ cur->ptr &= ~(sizeof(l1_pgentry_t) - 1);
+ page = frame_table + pfn;
+ flags = page->flags;
+ if ( (flags | current->domain) == PGT_l1_page_table )
+ {
+
+ *(unsigned long *)map_domain_mem(cur->ptr) = cur->val;
+ err = 0;
+ }
+ else
+ {
+ MEM_LOG("UNCHECKED_UPDATE: Bad domain %d, or"
+ " bad pte type %08lx", current->domain, flags);
+ }
+ break;
+
+ /*
+ * PGREQ_EXTENDED_COMMAND: Extended command is specified
+ * in the least-siginificant bits of the 'value' field.
+ */
+ case PGREQ_EXTENDED_COMMAND:
+ cur->ptr &= ~(sizeof(l1_pgentry_t) - 1);
+ err = do_extended_command(cur->ptr, cur->val);
+ break;
+
+ default:
+ MEM_LOG("Invalid page update command %08lx", cur->ptr);
+ break;
+ }
+
+ if ( err )
+ {
+ page = frame_table + (cur->ptr >> PAGE_SHIFT);
+ printk(KERN_ALERT "bd240 debug: Update request %lx\n", cur->ptr & (sizeof(l1_pgentry_t) - 1));
+ printk(KERN_ALERT "bd240 debug: Update request %lx, %lx\n", cur->ptr, cur->val);
+ printk(KERN_ALERT "bd240 debug: Page flags %lx\n", page->flags);
+
+ kill_domain_with_errmsg("Illegal page update request");
+ }
+
+ cur++;
+ }
+
+ if ( tlb_flush[smp_processor_id()] )
+ {
+ tlb_flush[smp_processor_id()] = 0;
+ __asm__ __volatile__ (
+ "movl %%eax,%%cr3" : :
+ "a" (pagetable_val(current->mm.pagetable)));
+ }
+
+ return(0);
+}
typedef struct dom0_newdomain_st
{
- unsigned int memory_kb;
+ unsigned int domain; // return parameter
+ unsigned int memory_kb;
unsigned int num_vifs; // temporary
- unsigned int domain;
+ unsigned long pg_head; // return parameter
} dom0_newdomain_t;
typedef struct dom0_killdomain_st
typedef struct domain_launch
{
- unsigned long domain;
+ unsigned int domain;
unsigned long l2_pgt_addr;
unsigned long virt_load_addr;
unsigned long virt_shinfo_addr;
unsigned long virt_startinfo_addr;
+ unsigned long pgt_update_arr;
+ unsigned long num_pgt_updates;
unsigned int num_vifs;
char cmd_line[MAX_CMD_LEN];
} dom_meminfo_t;
#define XENO_BASE "xeno" // proc file name defs should be in separate .h
#define DOM0_CMD_INTF "dom0_cmd"
#define DOM0_FT "frame_table"
-#define DOM0_NEWDOM "new_dom_id"
+#define DOM0_NEWDOM "new_dom_data"
#define MAX_LEN 16
#define DOM_DIR "dom"
static ssize_t dom_mem_write(struct file * file, const char * buff,
size_t size , loff_t * off)
{
- unsigned long addr;
- proc_memdata_t * mem_data = (proc_memdata_t *)((struct proc_dir_entry *)file->f_dentry->d_inode->u.generic_ip)->data;
+ dom_mem_t mem_data;
- copy_from_user(&addr, (unsigned long *)buff, sizeof(addr));
+ copy_from_user(&mem_data, (dom_mem_t *)buff, sizeof(dom_mem_t));
- if(direct_disc_unmap(addr, mem_data->pfn, mem_data->tot_pages) == 0){
- return sizeof(addr);
+ if(direct_disc_unmap(mem_data.vaddr, mem_data.start_pfn,
+ mem_data.tot_pages) == 0){
+ return sizeof(sizeof(dom_mem_t));
} else {
return -1;
}
/* remap the range using xen specific routines */
addr = direct_mmap(mem_data->pfn << PAGE_SHIFT, mem_data->tot_pages << PAGE_SHIFT, prot, 0, 0);
- printk(KERN_ALERT "bd240 debug: dom_mem_read: %lx, %lx @ %lx\n", mem_data->pfn << PAGE_SHIFT, mem_data->tot_pages << PAGE_SHIFT, addr);
copy_to_user((unsigned long *)buff, &addr, sizeof(addr));
- printk(KERN_ALERT "bd240 debug: exiting dom_mem_read\n");
-
return sizeof(addr);
-
}
struct file_operations dom_mem_ops = {
memdata->tot_pages = tot_pages;
file->data = memdata;
- printk(KERN_ALERT "bd240 debug: cmd setup dom mem: %lx, %d\n", memdata->pfn, memdata->tot_pages);
-
ret = 0;
break;
}
return ret;
}
-/* return dom id stored as data pointer to userspace */
-static int dom_id_read_proc(char *page, char **start, off_t off,
- int count, int *eof, void *data)
+/* function used to retrieve data associated with new domain */
+static ssize_t dom_data_read(struct file * file, char * buff, size_t size, loff_t * off)
{
- char arg[16];
- sprintf(arg, "%d", (int)data);
- strcpy(page, arg);
+ dom0_newdomain_t * dom_data = (dom0_newdomain_t *)
+ ((struct proc_dir_entry *)file->f_dentry->d_inode->u.generic_ip)->data;
+
+ copy_to_user((dom0_newdomain_t *)buff, dom_data, sizeof(dom0_newdomain_t));
+
remove_proc_entry(DOM0_NEWDOM, xeno_base);
- return sizeof(unsigned int);
+
+ kfree(dom_data);
+
+ return sizeof(dom0_newdomain_t);
}
+struct file_operations newdom_data_fops = {
+ read: dom_data_read,
+};
+
static int cmd_write_proc(struct file *file, const char *buffer,
u_long count, void *data)
{
dom0_op_t op;
int ret = 0;
struct proc_dir_entry * new_dom_id;
+ dom0_newdomain_t * params;
+
copy_from_user(&op, buffer, sizeof(dom0_op_t));
/* is the request intended for hypervisor? */
if(op.cmd != MAP_DOM_MEM){
+
ret = HYPERVISOR_dom0_op(&op);
/* if new domain created, create proc entries */
if(op.cmd == DOM0_NEWDOMAIN){
create_proc_dom_entries(ret);
+ params = (dom0_newdomain_t *)kmalloc(sizeof(dom0_newdomain_t),
+ GFP_KERNEL);
+ params->memory_kb = op.u.newdomain.memory_kb;
+ params->pg_head = op.u.newdomain.pg_head;
+ params->num_vifs = op.u.newdomain.num_vifs;
+ params->domain = op.u.newdomain.domain;
+
+ printk(KERN_ALERT "bd240 debug: cmd_write: %lx, %d, %d\n", params->pg_head, params->memory_kb, params->domain);
+
/* now notify user space of the new domain's id */
new_dom_id = create_proc_entry(DOM0_NEWDOM, 0600, xeno_base);
if ( new_dom_id != NULL )
{
new_dom_id->owner = THIS_MODULE;
new_dom_id->nlink = 1;
- new_dom_id->read_proc = dom_id_read_proc;
- new_dom_id->data = (void *)ret;
+ new_dom_id->proc_fops = &newdom_data_fops;
+ new_dom_id->data = (void *)params;
}
}
} else {
- ret = dom_map_mem(op.u.reqdommem.domain, op.u.reqdommem.start_pfn,
- op.u.reqdommem.tot_pages);
+ ret = dom_map_mem(op.u.dommem.domain, op.u.dommem.start_pfn,
+ op.u.dommem.tot_pages);
}
out:
static int __init init_module(void)
{
-
frame_table = (frame_table_t *)start_info.frame_table;
frame_table_len = start_info.frame_table_len;
frame_table_pa = start_info.frame_table_pa;
do {
pte_t oldpage;
oldpage = ptep_get_and_clear(pte);
- /*
- printk(KERN_ALERT "bd240 debug: %lx - %lx\n", pte, phys_addr);
- */
direct_set_pte(pte, direct_mk_pte_phys(phys_addr, prot));
forget_pte(oldpage);
entry = find_direct(¤t->mm->context.direct_list, addr);
if(entry != ¤t->mm->context.direct_list){
list_add_tail(&dmmap->list, entry);
- printk(KERN_ALERT "bd240 debug: added node %lx, size %lx in the middle\n", dmmap->vm_start, size);
} else {
list_add_tail(&dmmap->list, ¤t->mm->context.direct_list);
- printk(KERN_ALERT "bd240 debug: added node %lx, size %lx at tail\n", dmmap->vm_start, size);
}
/* and perform the mapping */
return -1;
list_del(&node->list);
- printk(KERN_ALERT "bd240 debug: delisted %lx from dlist\n", node->vm_start);
kfree(node);
direct_zap_page_range(current->mm, addr, size);
struct list_head * curr;
struct list_head * direct_list = ¤t->mm->context.direct_list;
- printk(KERN_ALERT "bd240 debug: direct_disc_unmap\n");
-
curr = direct_list->next;
while(curr != direct_list){
node = list_entry(curr, direct_mmap_node_t, list);
if(curr == direct_list)
return -1;
- printk(KERN_ALERT "bd240 debug: direct_disc_unmap, deleted from direct_list\n");
-
list_del(&node->list);
kfree(node);
- printk(KERN_ALERT "bd240 debug: direct_disc_unmap, from %lx, tot_pages %lx\n", from, tot_pages);
-
while(count < tot_pages){
direct_zap_page_range(current->mm, from, PAGE_SIZE);
from += PAGE_SIZE;
-
/******************************************************************************
* dom0_ops.h
*
typedef struct dom0_newdomain_st
{
+ unsigned int domain;
unsigned int memory_kb;
unsigned int num_vifs; // temporary
- unsigned int domain; // return parameter
+ unsigned long pg_head; // return parameter
} dom0_newdomain_t;
typedef struct dom0_killdomain_st
unsigned long ts_phy_addr;
} dom0_tsmap_t;
-typedef struct dom_mem_req
+typedef struct dom_mem
{
unsigned int domain;
+ unsigned long vaddr;
unsigned long start_pfn;
int tot_pages;
-} dom_mem_req_t;
+} dom_mem_t;
typedef struct domain_launch
{
unsigned long virt_load_addr;
unsigned long virt_shinfo_addr;
unsigned long virt_startinfo_addr;
+ unsigned long pgt_update_arr;
+ unsigned long num_pgt_updates;
+ unsigned int num_vifs;
char cmd_line[MAX_CMD_LEN];
} dom_meminfo_t;
dom0_newdomain_t newdomain;
dom0_killdomain_t killdomain;
dom0_tsmap_t mapdomts;
- dom_mem_req_t reqdommem;
+ dom_mem_t dommem;
+ dom_meminfo_t meminfo;
}
u;
} dom0_op_t;